In [1]:
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib
%matplotlib inline
In [15]:
# the ops add the default graph by default
matrix1 = tf.constant([[2.,3.]])
matrix2 = tf.constant([[2.,3.],[2.,3.]])
product = tf.matmul(matrix1,matrix2)
In [16]:
sess = tf.Session()
result = sess.run(product)
print(result)
sess.close()
In [19]:
sess = tf.InteractiveSession()
x = tf.Variable([1.,2.])
a = tf.constant([3.,4.])
# must initilize the variable
x.initializer.run()
# sub a from x
sub = tf.sub(a,x)
sub.eval()
sess.close()
In [31]:
# reset the graph
tf.reset_default_graph()
# counter
state = tf.Variable(0,name="counter")
# constant
one = tf.constant(1)
# new state
new_state = tf.add(state,one)
# assign new_state to state
# note that assign return the ref of state
update = tf.assign(state,new_state)
# init ops
init = tf.initialize_all_variables()
with tf.Session() as sess:
# initialize variable
sess.run(init)
# print the initial state
print(sess.run(state))
# update three times
for _ in range(3):
print(sess.run(update))
In [38]:
tf.reset_default_graph()
input1 = tf.placeholder(tf.float32,shape=[None,2])
input2 = tf.placeholder(tf.float32,shape=[2,1])
output = tf.matmul(input1,input2)
with tf.Session() as sess:
res = sess.run([output],feed_dict={input1:[[4.,5.]],input2:[[3.],[5.]]})
print(res)
In [5]:
with tf.Session() as sess:
logits = tf.Variable([[0.2,0.3,0.5],[0.3,0.3,0.4]],name='logits')
labels = [0,1]
init = tf.initialize_all_variables()
sess.run(init)
correct_number = tf.nn.in_top_k(logits,labels,1)
true = tf.reduce_sum(tf.cast(correct_number,tf.int32))
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits,labels,
name='xentropy')
# print(true)
print(sess.run(true))
print(sess.run(cross_entropy))
In [22]:
# adaptive learning rate
# not modify learning_rate_base just return the learning_rate_now for usage
def learning_policy(learning_rate_base,global_step,decay_step,decay_rate):
decay_size = (global_step+1) / decay_step
learning_rate_now = learning_rate_base *tf.pow(decay_rate,tf.to_float(decay_size))
return learning_rate_now
with tf.Session() as sess:
learning_rate_base = tf.Variable(0.1,dtype=tf.float32,name='learning_rate',trainable=False)
global_step = tf.Variable(0,name='global_step',dtype=tf.int64,trainable=False)
global_step = tf.assign_add(global_step,1)
learning_rate_now = learning_policy(learning_rate_base,
global_step,
10,
0.1)
init = tf.initialize_all_variables()
sess.run(init)
for _ in range(30):
print(sess.run(learning_rate_now))
In [19]:
tf.train.exponential_decay?